In [1]:
import tensorflow as tf
In [2]:
tf.__version__
Out[2]:
In [3]:
from tensorflow.contrib import keras
In [1]:
from keras.datasets import cifar100
(X_train, Y_train), (X_test, Y_test) = cifar100.load_data(label_mode='fine')
In [2]:
from keras import backend as K
img_rows, img_cols = 32, 32
if K.image_data_format() == 'channels_first':
shape_ord = (3, img_rows, img_cols)
else: # channel_last
shape_ord = (img_rows, img_cols, 3)
In [3]:
shape_ord
Out[3]:
In [4]:
X_train.shape
Out[4]:
In [5]:
import numpy as np
nb_classes = len(np.unique(Y_train))
In [6]:
from keras.applications import vgg16
from keras.layers import Input
In [7]:
vgg16_model = vgg16.VGG16(weights='imagenet', include_top=False,
input_tensor=Input(shape_ord))
vgg16_model.summary()
In [8]:
for layer in vgg16_model.layers:
layer.trainable = False # freeze layer
In [9]:
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.normalization import BatchNormalization
In [10]:
x = Flatten(input_shape=vgg16_model.output.shape)(vgg16_model.output)
x = Dense(4096, activation='relu', name='ft_fc1')(x)
x = Dropout(0.5)(x)
x = BatchNormalization()(x)
predictions = Dense(nb_classes, activation = 'softmax')(x)
In [11]:
from keras.models import Model
In [12]:
#create graph of your new model
model = Model(inputs=vgg16_model.input, outputs=predictions)
#compile the model
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
In [13]:
model.summary()
In [14]:
from keras.callbacks import TensorBoard
# Arguments
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation
and weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved.
See the details about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed.
In [15]:
## one-hot Encoding of labels (1 to 100 classes)
from keras.utils import np_utils
Y_train.shape
Out[15]:
In [16]:
Y_train = np_utils.to_categorical(Y_train)
In [17]:
Y_train.shape
Out[17]:
In [18]:
def generate_batches(X, Y, batch_size=128):
""""""
# Iterations has to go indefinitely
start = 0
while True:
yield (X[start:start+batch_size], Y[start:start+batch_size])
start=batch_size
batch_size = 64
steps_per_epoch = np.floor(X_train.shape[0] / batch_size)
model.fit_generator(generate_batches(X_train, Y_train, batch_size=batch_size),
steps_per_epoch=steps_per_epoch, epochs=20, verbose=1,
callbacks=[TensorBoard(log_dir='./tf_logs', histogram_freq=10,
write_graph=True, write_images=True,
embeddings_freq=10,
embeddings_layer_names=['block1_conv2',
'block5_conv1',
'ft_fc1'],
embeddings_metadata=None)])
Out[18]:
In [ ]:
%%bash
python -m tensorflow.tensorboard --logdir=./tf_logs
In [1]:
import operator
import threading
from functools import reduce
import keras
import keras.backend as K
from keras.engine import Model
import numpy as np
import tensorflow as tf
import time
from keras.layers import Conv2D
from tqdm import tqdm
In [2]:
def prod(factors):
return reduce(operator.mul, factors, 1)
In [ ]:
TRAINING = True
with K.get_session() as sess:
shp = [10, 200, 200, 3]
shp1 = [10, 7, 7, 80]
inp = K.placeholder(shp)
inp1 = K.placeholder(shp1)
queue = tf.FIFOQueue(20, [tf.float32, tf.float32], [shp, shp1])
x1, y1 = queue.dequeue()
enqueue = queue.enqueue([inp, inp1])
model = keras.applications.ResNet50(False, "imagenet", x1, shp[1:])
for i in range(3):
model.layers.pop()
model.layers[-1].outbound_nodes = []
model.outputs = [model.layers[-1].output]
output = model.outputs[0] # 7x7
# Reduce filter size to avoid OOM
output = Conv2D(32, (1, 1), padding="same", activation='relu')(output)
output3 = Conv2D(5 * (4 + 11 + 1), (1, 1), padding="same", activation='relu')(
output) # YOLO output B (4 + nb_class +1)
cost = tf.reduce_sum(tf.abs(output3 - y1))
optimizer = tf.train.RMSPropOptimizer(0.001).minimize(cost)
sess.run(tf.global_variables_initializer())
def get_input():
# Super long processing I/O bla bla bla
return np.arange(prod(shp)).reshape(shp).astype(np.float32), np.arange(prod(shp1)).reshape(shp1).astype(
np.float32)
def generate(coord, enqueue_op):
while not coord.should_stop():
inp_feed, inp1_feed = get_input()
sess.run(enqueue_op, feed_dict={inp: inp_feed, inp1: inp1_feed})
start = time.time()
for i in tqdm(range(10)): # EPOCH
for j in range(30): # Batch
x,y = get_input()
optimizer_, s = sess.run([optimizer, queue.size()],
feed_dict={x1:x,y1:y, K.learning_phase(): int(TRAINING)})
print("Took : ", time.time() - start)
coordinator = tf.train.Coordinator()
threads = [threading.Thread(target=generate, args=(coordinator, enqueue)) for i in range(10)]
for t in threads:
t.start()
start = time.time()
for i in tqdm(range(10)): # EPOCH
for j in range(30): # Batch
optimizer_, s = sess.run([optimizer, queue.size()],
feed_dict={K.learning_phase(): int(TRAINING)})
print("Took : ", time.time() - start)
def clear_queue(queue, threads):
while any([t.is_alive() for t in threads]):
_, s = sess.run([queue.dequeue(), queue.size()])
print(s)
coordinator.request_stop()
clear_queue(queue, threads)
coordinator.join(threads)
print("DONE Queue")